home
***
CD-ROM
|
disk
|
FTP
|
other
***
search
/
Language/OS - Multiplatform Resource Library
/
LANGUAGE OS.iso
/
pcr
/
pcr4_4.lha
/
DIST
/
gc
/
INCLUDE
/
xr
/
GCPrivate.h
< prev
next >
Wrap
C/C++ Source or Header
|
1992-04-16
|
42KB
|
1,182 lines
/* begincopyright
Copyright (c) 1988-1991 Xerox Corporation. All rights reserved.
Use and copying of this software and preparation of derivative works based
upon this software are permitted. Any distribution of this software or
derivative works must comply with all applicable United States export
control laws. This software is made available AS IS, and Xerox Corporation
makes no warranty about the software, its performance or its conformity to
any specification. Any person obtaining a copy of this software is requested
to send their name and post office or electronic mail address to:
PCR Coordinator
Xerox PARC
3333 Coyote Hill Rd.
Palo Alto, CA 94304
Parts of this software were derived from code bearing the copyright notice:
Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
This material may be freely distributed, provided this notice is retained.
This material is provided as is, with no warranty expressed or implied.
Use at your own risk.
endcopyright */
/* Machine specific parts contributed by various other people. */
/* This version assumes a table of heap pages is maintained. */
/*
* Derived from both the Boehm-Demers collector, and the initial PCR
* collector built by Weiser, Hauser et al..
* Boehm, May 29, 1991 1:08:24 pm PDT
*/
#ifndef _XR_GCPrivate_
#define _XR_GCPrivate_ 1
# include "xr/GC.h"
# include "xr/ThreadsBackdoor.h"
typedef unsigned long word;
/*********************************/
/* */
/* Definitions for conservative */
/* collector */
/* */
/*********************************/
/*********************************/
/* */
/* Easily changeable parameters */
/* */
/*********************************/
# if defined(sun) && defined(mc68000)
# define M68K_SUN
# define mach_type_known
# endif
# if defined(hp9000s300)
# define M68K_HP
# define mach_type_known
# endif
# if defined(vax)
# define VAX
# define mach_type_known
# endif
# if defined(mips)
# define MIPS
# define mach_type_known
# endif
# if defined(sequent) && defined(i386)
# define I386
# define mach_type_known
# endif
# if defined(ibm032)
# define RT
# define mach_type_known
# endif
# if defined(sun) && defined(sparc)
# define SPARC
# define mach_type_known
# endif
# if defined(_IBMR2)
# define IBMRS6000
# define mach_type_known
# endif
/* Feel free to add more clauses here */
/* Or manually define the machine type here: */
# ifndef mach_type_known
# define M68K_SUN /* Guess "Sun" */
/* Mapping is: M68K_SUN ==> Sun3, */
/* M68K_HP ==> HP9000/300, */
/* I386 ==> Sequent Symmetry, */
/* NS32K ==> Encore Multimax, */
/* MIPS ==> R2000 or R3000 */
# endif
#define GATHERSTATS /* Compute garbage collection statistics not necessary */
/* for normal GC operation. */
#define PRINTSTATS
/* Print garbage collection statistics */
/* For less verbose output, undefine in reclaim.c */
#define PRINTTIMES
/* Print the amount of time consumed by each garbage */
/* collection. */
#define PRINTBLOCKS /* Print object sizes associated with heap blocks, */
/* whether the objects are atomic or composite, and */
/* whether or not the block was found to be empty */
/* during the reclaim phase. Typically generates */
/* about one screenful per garbage collection. */
#undef PRINTBLOCKS
#define REPORT_FAILURE /* Print values that looked "almost" like pointers */
#undef REPORT_FAILURE
#ifdef SILENT
# ifdef PRINTSTATS
# undef PRINTSTATS
# endif
# ifdef PRINTTIMES
# undef PRINTTIMES
# endif
# ifdef PRINTNBLOCKS
# undef PRINTNBLOCKS
# endif
#endif
#if defined(PRINTSTATS) && !defined(GATHERSTATS)
# define GATHERSTATS
#endif
#define MAP_SIZE 24576 /* total data size < MAP_SIZE * HBLKSIZE = 96 Meg */
#define INTERIOR_POINTERS
/* Follow pointers to the interior of an object. */
/* Substantially increases the probability of */
/* unnnecessary space retention. May be necessary */
/* with gcc -O or other C compilers that may clobber */
/* values of dead variables prematurely. Pcc */
/* derived compilers appear to pose no such problems. */
/* Empirical evidence suggests that this is probably */
/* still OK for most purposes, so long as pointers */
/* are known to be 32 bit aligned. The combination */
/* of INTERIOR_POINTERS and UNALIGNED (e.g. on a */
/* Sun 3 with the standard compiler) causes easily */
/* observable spurious retention and performance */
/* degradation. */
#define RESTRICTED_INTERIOR_POINTERS
/* Accept pointers into the middle of objects only */
/* if the displacement from the beginning of the */
/* object is one previously registered with */
/* GC_register_displacement. */
/* Registered displacements must be expressible */
/* in words, and must be < MAX_OFFSET */
#define INTERIOR_STACK_POINTERS
/* Accept pointers into the middle of objects */
/* from the stack area even if they are not */
/* otherwise accepted. If this is defined, then */
/* RESTRICTED_INTERIOR_POINTERS applies only to */
/* the heap and to static data areas. */
#define BLACK_LIST
/* Maintain a list of pages that have known false */
/* references to them, and should thus not contain */
/* objects that may bin large amounts of memory. */
#ifdef SPARC
/* On a SPARC, this appears to result in lots of false hits, since */
/* the architecture and compilers are sloppy about stack space */
/* usage. */
# undef INTERIOR_STACK_POINTERS
#endif
#ifdef RESTRICTED_INTERIOR_POINTERS
# define MAX_OFFSET 4096
# define MAP_CACHE_SZ 20 /* Maps of block layout are built for */
/* MAP_CACHE_SZ different allocation sizes */
/* less than MAXOBJSZ. */
#endif
#ifdef BLACK_LIST
# ifdef INTERIOR_STACK_POINTERS
--> not implemented correctly
# endif
# if defined INTERIOR_POINTERS && !defined(RESTRICTED_INTERIOR_POINTERS)
--> not implemented correctly
# endif
#endif
#define SEPARATE_HEADERS
/* Keep mark bits and other information associated */
/* with heap blocks separate from the heap blocks */
/* themselves. Especially in combination with */
/* DIRTY_BITS, this can significantly reduce */
/* page faults for large address spaces. In the */
/* case of non-32-bit aligned pointers, it still */
/* makes sense to discard the beginning of blocks */
/* however, since there are otherwise too many bogus */
/* references to these locations. */
#ifdef SEPARATE_HEADERS
# define DISCARD_WORDS 16 /* Discard this many words at the beginning */
/* of each block, if pointers are not aligned */
/* The definition of quicktest below assumes a value */
/* of at least 8. */
# define ALIGNED_DISCARD_WORDS 0 /* Analogous value if pointers are */
/* aligned. Less critical, since */
/* bogus pointers to the beginning */
/* of a block are less likely. */
#endif
#ifdef SPARC
# define ALIGN_DOUBLE /* Align objects of size > 1 word on 2 word */
/* boundaries. Wasteful of memory, but */
/* apparently required by SPARC architecture. */
#endif
#define FREE_MEM_RATIO 4
/* Initial value of GC_free_mem_ratio. Should */
/* be >= 1. */
#define MAX_HINCR 1024 /* Maximum number of heap blocks by which to expand */
/* the heap at once (unless forced). */
#define MIN_HINCR 256 /* Minimum number of heap blocks by which to expand */
/* the heap at once. */
#define DIRTY_BITS
/* Virtual dirty bits are available, enabling the */
/* following strategy (unless GC_mode dictates */
/* otherwise): */
/* Perform frequent "ephemeral" collections to */
/* reduce the frequency of full collections. */
/* Clear mark bits only for full garbage */
/* collection. For the other collections, leave */
/* mark bits set from the preceding collection, */
/* and treat marked objects on dirty pages as roots.*/
/* Try to reclaim objects only on dirty pages. */
/* In order to keep as many pages as possible clean */
/* we do not allocate objects that are expected to */
/* be shortlived on pages that satisfy the TOO_FULL */
/* criterion. */
# ifdef IBMRS6000
/* We don't have virtual dirty bits on an RS/6000 yet */
# undef DIRTY_BITS
# endif
# define TENURE(empty_words) \
(BYTES_TO_WORDS(HBLKSIZE) > ((empty_words) << 2))
/* At most 1/4 can still be allocated */
/* Criterion for tenuring a heap block. If a heap */
/* block is sufficiently full, we stop allocating */
/* from it to keep it clean and to improve */
/* allocation locality. */
/* empty_words are the number of remaining words in */
/* the heap block that can be allocated. */
# define MIN_COMPOSITE_IN_USE 500000
/* Minimum on accessible composite words used in */
/* garbage collection decisions. Large values */
/* inhibit frequent collections during startup */
/* but generate longer pauses. */
#define FULL_GC_ALLOCS 0 /* Invoke the full collector after this many */
/* words are allocated. Note that allocation */
/* count is reset to 0 after any kind of */
/* collection. */
/* This is only a default value. */
/* 0 ==> collect only when full. A 0 value */
/* is highly recommended at the moment. */
#define MERGE_SIZES /* Round up some object sizes, so that fewer distinct */
/* free lists are actually maintained. This applies */
/* only to the top level routines in misc.c, not to */
/* user generated code that calls allocobj and */
/* allocaobj directly. */
/* Slows down average programs slightly. May however */
/* substantially reduce fragmentation if allocation */
/* request sizes are widely scattered. */
/* ALIGN_DOUBLE requires MERGE_SIZES at present. */
# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
# define MERGE_SIZES
# endif
/* RESTRICTED_INTERIOR_POINTERS requires INTERIOR_POINTERS */
# if defined(RESTRICTED_INTERIOR_POINTERS) && !defined(INTERIOR_POINTERS)
# define INTERIOR_POINTERS
# endif
#define VISUALIZE /* Make calls to Mark Weiser's GC visualizer */
#undef VISUALIZE
/* For PRINTTIMES to tell the truth, we need to know the value of HZ for
this system. */
#if defined(M68K_HP) || defined(M68K_SUN) || defined(SPARC)
# include <sys/param.h>
# define LOCAL_HZ HZ
#else
# define LOCAL_HZ 60 /* Guess that we're in the U.S. */
#endif
#ifdef M68K_SUN
# define UNALIGNED /* Pointers are not longword aligned */
# define ALIGNMENT 2 /* Pointers are aligned on 2 byte boundaries */
/* by the Sun C compiler. */
#else
# ifdef VAX
# undef UNALIGNED /* Pointers are longword aligned by 4.2 C compiler */
# define ALIGNMENT 4
# else
# ifdef RT
# undef UNALIGNED
# define ALIGNMENT 4
# else
# ifdef SPARC
# undef UNALIGNED
# define ALIGNMENT 4
# else
# ifdef I386
# undef UNALIGNED /* Sequent compiler aligns pointers */
# define ALIGNMENT 4
# else
# ifdef NS32K
# undef UNALIGNED /* Pointers are aligned on NS32K */
# define ALIGNMENT 4
# else
# ifdef MIPS
# undef UNALIGNED /* MIPS hardware requires pointer */
/* alignment */
# define ALIGNMENT 4
# else
# ifdef M68K_HP
# define UNALIGNED
# define ALIGNMENT 2 /* 2 byte alignment inside struct/union, */
/* 4 bytes elsewhere */
# else
# ifdef IBMRS6000
# undef UNALIGNED
# define ALIGNMENT 4
# else
--> specify alignment <--
# endif
# endif
# endif
# endif
# endif
# endif
# endif
# endif
#endif
# define GC_MULT 3 /* Don't collect if the fraction of */
/* non-collectable memory in the heap */
/* exceeds GC_MUL/GC_DIV */
# define GC_DIV 4
# define NON_GC_HINCR MIN_HINCR
/* Heap increment if most of heap if collection */
/* was suppressed because most of heap is not */
/* collectable */
/* heap address bounds. These are extreme bounds used for sanity checks. */
/* HEAPLIM may have to be increased for machines with incredibly large */
/* amounts of memory. */
#ifdef RT
# define HEAPSTART 0x10000000
# define HEAPLIM 0x1fff0000
#else
# if defined(M68K_SUN) || defined(M68K_HP)
# define HEAPSTART 0x00010000
# define HEAPLIM 0x04000000
# else
# ifdef SPARC
# define HEAPSTART 0x00010000
# define HEAPLIM 0x10000000
# else
# ifdef VAX
# define HEAPSTART 0x400
# define HEAPLIM 0x10000000
# else
# ifdef I386
# define HEAPSTART 0x1000
# define HEAPLIM 0x10000000
# else
# ifdef NS32K
# define HEAPSTART 0x2000
# define HEAPLIM 0x10000000
# else
# ifdef MIPS
# define HEAPSTART 0x10000000
# define HEAPLIM 0x20000000
# else
# ifdef IBMRS6000
# define HEAPSTART 0x80000000
# define HEAPLIM 0x8ff70000
# else
--> values unknown <--
# endif
# endif
# endif
# endif
# endif
# endif
# endif
#endif
/*********************************/
/* */
/* Machine-dependent defines */
/* */
/*********************************/
#define WORDS_TO_BYTES(x) ((x)<<2)
#define BYTES_TO_WORDS(x) ((x)>>2)
#define WORDSZ 32
#define LOGWL 5 /* log[2] of above */
#define BYTES_PER_WORD (sizeof (word))
#define modHALFWORDSZ(n) ((n) & 0xf) /* mod n by size of half word */
#define divHALFWORDSZ(n) ((n) >> 4) /* divide n by size of half word */
#define modWORDSZ(n) ((n) & 0x1f) /* mod n by size of word */
#define divWORDSZ(n) ((n) >> 5) /* divide n by size of word */
/*********************/
/* */
/* Size Parameters */
/* */
/*********************/
/* heap block size, bytes */
/* for RT see comment below */
#define HBLKSIZE 0x1000 /* Must be power of 2 */
/* max size objects supported by freelist (larger objects may be */
/* allocated, but less efficiently) */
/* asm(".set MAXOBJSZ,0x200") if HBLKSIZE/2 == 0x200 */
#define MAXOBJSZ (HBLKSIZE/8)
/* Should be BYTES_TO_WORDS(HBLKSIZE/2), but a cpp */
/* misfeature prevents that. */
# define divHBLKSZ(n) ((n) >> 12)
# define modHBLKSZ(n) ((n) & (HBLKSIZE-1))
# define HBLKPTR(objptr) ((struct hblk *)(((long) (objptr)) & ~(HBLKSIZE-1)))
/********************************************/
/* */
/* H e a p B l o c k s */
/* */
/********************************************/
/* heap block header */
#define HBLKMASK (HBLKSIZE-1)
#define BITS_PER_HBLK (HBLKSIZE * 8)
#define MARK_BITS_PER_HBLK (BITS_PER_HBLK/WORDSZ)
/* upper bound */
/* We allocate 1 bit/word. Only the first word */
/* in each object is actually marked. */
# ifdef ALIGN_DOUBLE
# define MARK_BITS_SZ (((MARK_BITS_PER_HBLK + 2*WORDSZ - 1)/(2*WORDSZ))*2)
# else
# define MARK_BITS_SZ ((MARK_BITS_PER_HBLK + WORDSZ - 1)/WORDSZ)
# endif
/* Upper bound on number of mark words per heap block */
struct hblkhdr {
long hbh_sz; /* sz > 0 ==> objects are sz-tuples of poss. pointers */
/* sz < 0 ==> objects are sz-tuples not pointers */
/* if free, the size in bytes of the whole block */
/* Misc.c knows that hbh_sz comes first. */
struct hblk * hbh_next; /* Link field for hblk free list */
struct hblk * hbh_sz_link;
/* Next heap block containing objects of the */
/* same size as this one, on which garbage */
/* objects have not yet been reclaimed. */
char hbh_uninit; /* If this is a free heap block, then it has not been */
/* cleared. */
char hbh_tenured; /* This block should not be reclaimed */
/* because it contains too few empty */
/* objects. */
/* Allocated blocks only. */
unsigned char hbh_last_reclaimed; /* gc_no when block was last */
/* reclaimed or reallocated. Not */
/* valid for large object blocks. */
/* Used defensively, since it */
/* might wrap around. */
char hbh_busy; /* Currently being reclaimed. */
/* Valid only for allocated blocks. */
long hbh_marks[MARK_BITS_SZ];
/* Bit i in the array refers to the */
/* object starting at the ith word (header */
/* INCLUDED) in the heap block. */
/* For free blocks, hbh_marks[0] = 1, indicates */
/* block is uninitialized. */
/* For allocated blocks, hbh_marks[0] = 1 */
/* indicates that the block should not be */
/* reclaimed during a partial collection. */
/* This should be changed if HBLKSIZE is */
/* decreased. */
};
/* heap block body */
# ifdef SEPARATE_HEADERS
# ifdef UNALIGNED
# define BODY_SZ ((HBLKSIZE-WORDS_TO_BYTES(DISCARD_WORDS))/sizeof(word))
struct hblk {
word garbage[DISCARD_WORDS];
word hb_body[BODY_SZ];
};
# define HDR_BYTES WORDS_TO_BYTES(DISCARD_WORDS)
# else
# define BODY_SZ ((HBLKSIZE \
-WORDS_TO_BYTES(ALIGNED_DISCARD_WORDS))/sizeof(word))
struct hblk {
# if ALIGNED_DISCARD_WORDS != 0
word garbage[ALIGNED_DISCARD_WORDS];
# endif
word hb_body[BODY_SZ];
};
# define HDR_BYTES WORDS_TO_BYTES(ALIGNED_DISCARD_WORDS)
# endif
# else
# define BODY_SZ ((HBLKSIZE-sizeof(struct hblkhdr))/sizeof(word))
struct hblk {
struct hblkhdr hb_hdr;
word hb_body[BODY_SZ];
};
# define HDR_BYTES (sizeof (struct hblkhdr))
# endif
# define HDR_WORDS BYTES_TO_WORDS(HDR_BYTES)
# ifdef SEPARATE_HEADERS
# define HDR(h) headers[divHBLKSZ((unsigned)(h) - (unsigned) GC_heapstart)]
# define hb_sz(h) (HDR(h) -> hbh_sz)
# define hb_sz_from_hdr(hhdr) (hhdr -> hbh_sz)
# define hb_marks(h) (HDR(h) -> hbh_marks)
# define hb_next(h) (HDR(h) -> hbh_next)
# define hb_sz_link(h) (HDR(h) -> hbh_sz_link)
# define hb_sz_link_from_hdr(hhdr) (hhdr -> hbh_sz_link)
# define hb_uninit(h) (HDR(h) -> hbh_uninit)
# define hb_tenured(h) (HDR(h) -> hbh_tenured)
# define hb_busy(h) (HDR(h) -> hbh_busy)
# define hb_busy_from_hdr(hhdr) (hhdr -> hbh_busy)
/* Cannot use mark bits, since they can accidentally get set if the */
/* block is in use. */
# define hb_last_reclaimed(h) (HDR(h) -> hbh_last_reclaimed)
# else
# define hb_sz(h) ((h) -> hb_hdr.hbh_sz)
# define hb_marks(h) ((h) -> hb_hdr.hbh_marks)
# define hb_next(h) ((h) -> hb_hdr.hbh_next)
# define hb_sz_link(h) ((h) -> hb_hdr.hbh_sz_link)
# define hb_uninit(h) ((h) -> hb_hdr.hbh_uninit)
# define hb_tenured(h) ((h) -> hb_hdr.hbh_tenured)
# define hb_busy(h) ((h) -> hb_hdr.hbh_busy)
# define hb_last_reclaimed(h) ((h) -> hb_hdr.hbh_last_reclaimed)
# ifdef ALIGN_DOUBLE
# define hb_dummy(h) ((h) -> hb_hdr.hbh_dummy)
# endif
# endif
# ifdef SEPARATE_HEADERS
/* Free list for heap block headers */
typedef struct hblkhdr hdr;
# define HDR_NIL ((hdr *)0)
# define link(h) ((hdr *)((h) -> hbh_sz)) /* Link field for free list */
# define set_link(h, p) (h) -> hbh_sz = (long)(p);
# endif
/****************************/
/* */
/* Objects */
/* */
/****************************/
/* object structure */
struct obj {
union {
word oun_link; /* --> next object in freelist */
/* "encrypted" using macros */
/* below. This prevents a */
/* false reference to a free */
/* list from pinning the */
/* entire list. */
word oun_component[1]; /* treats obj as list of words */
# define obj_component obj_un.oun_component
} obj_un;
};
# define ENCRYPT_POINTER(p) (~((word) (p)))
# define DECRYPT_POINTER(p) (struct obj *)(~(p))
# define get_obj_link(p) DECRYPT_POINTER((p) -> obj_un.oun_link)
# define set_obj_link(p,q) ((p) -> obj_un.oun_link = ENCRYPT_POINTER(q))
/* Test whether something points to a legitimate heap object */
/* Check whether the given HBLKSIZE aligned hblk pointer refers to the */
/* beginning of a legitimate chunk. */
/* Assumes that *p is addressable */
# define is_hblk(p) (hblkmap[divHBLKSZ(((long)p) - ((long)GC_heapstart))] \
== HBLK_VALID)
# ifdef INTERIOR_POINTERS
/* Return the hblk_map entry for the pointer p */
# define get_map(p) (hblkmap[divHBLKSZ(((long)p) - ((long)GC_heapstart))])
# endif
# define WORD_NO(obj_ptr, hblk_ptr) (((word *) obj_ptr) - ((word *) hblk_ptr))
# define IDIV(x,y) ((x)/(y))
/* Allows replacement with a faster implementation on a SPARC. */
/* We no longer consider this worth the table size, since the */
/* collector calls it rarely. */
/* Return the word displacement of the beginning of the object that */
/* includes the word displacement w. */
/* Assumes sz <= MAXOBJSZ. */
/* Second version assumes obj_map[sz] is not nil. */
# define adjusted_word_no(w,sz) \
(IDIV((w - HDR_WORDS), (sz)) * (sz) + HDR_WORDS)
# ifdef INTERIOR_POINTERS
# define mapped_adjusted_word_no(w,sz) (w - obj_map[sz][w])
# ifdef RESTRICTED_INTERIOR_POINTERS
/* Check whether w is a valid word offset into a heap block for */
/* objects of size sz. Assumes sz < MAXOBJSZ and that w < size */
/* of one heap block, but > HDR_WORDS. */
/* May lie about pointers past the last object in the block. */
# define is_proper_obj(w,sz) \
valid_offsets[(w - HDR_WORDS) % (sz)]
# else
# define is_proper_obj(w,sz) 1
# endif
# else /* no interior pointers */
# define is_proper_obj(w,sz) \
((w - HDR_WORDS) % (sz) == 0)
# endif
/* The following is a quick test whether something is an object pointer */
/* It may err in the direction of identifying bogus pointers */
/* Assumes heap + text + data + bss < 256 Meg */
#ifdef M68K_SUN
# define TMP_POINTER_MASK 0xf0000003 /* pointer & POINTER_MASK should be 0 */
#else
# ifdef RT
# define TMP_POINTER_MASK 0xc0000003
# else
# ifdef VAX
# define TMP_POINTER_MASK 0xf0000003
# else
# ifdef SPARC
# define TMP_POINTER_MASK 0xf0000003
# else
# ifdef I386
# define TMP_POINTER_MASK 0xf0000003
# else
# ifdef NS32K
# define TMP_POINTER_MASK 0xf0000003
# else
# ifdef MIPS
# define TMP_POINTER_MASK 0xc0000003
# else
# ifdef M68K_HP
# define TMP_POINTER_MASK 0xf0000003
# else
# ifdef IBMRS6000
# define TMP_POINTER_MASK 0x70000003
# else
--> dont know <--
# endif
# endif
# endif
# endif
# endif
# endif
# endif
# endif
#endif
#ifdef INTERIOR_POINTERS
# define POINTER_MASK (TMP_POINTER_MASK & 0xfffffff8)
/* Don't pay attention to whether address is properly aligned */
#else
# define POINTER_MASK TMP_POINTER_MASK
#endif
#ifdef UNALIGNED
# define quicktest(p) (((long)(p)) > ((long)(GC_heapstart)) \
&& !(((unsigned long)(p)) & POINTER_MASK) \
&& (((long)(p)) & (HBLKMASK ^ 0x1f)))
#else
# ifdef IBMRS6000
# define quicktest(p) (((unsigned long)(p)) \
> ((unsigned long)(GC_heapstart)) \
&& !(((unsigned long)(p)) & POINTER_MASK))
# else
# define quicktest(p) (((long)(p)) > ((long)(GC_heapstart)) \
&& !(((unsigned long)(p)) & POINTER_MASK))
# endif
#endif
/* Marks are in a reserved area in each header. */
/* Each word has one mark bits associated */
/* with it. Only those corresponding to the beginning of an */
/* object are used. */
/* Operations */
/*
* Retrieve, set, clear the mark bit corresponding
* to the nth word in a given heap block.
* Note that retrieval will work, so long as *hblk is addressable,
* or there is a legitimate header associated with h.
* In particular, the check whether hblk is a legitimate heap block
* can be postponed until after the mark bit is examined.
*
* (Recall that bit n corresponds to object beginning at word n)
*/
# define mark_bit(hblk,n) ((hb_marks(hblk)[divWORDSZ(n)] \
>> (modWORDSZ(n))) & 1)
# ifdef SEPARATE_HEADERS
# define mark_bit_from_hdr(hblkhdr,n) (((hblkhdr) -> hbh_marks[divWORDSZ(n)] \
>> (modWORDSZ(n))) & 1)
# endif
/* The following assume the mark bit in question is either initially */
/* cleared or it already has its final value */
# define set_mark_bit(hblk,n) hb_marks(hblk)[divWORDSZ(n)] \
|= 1 << modWORDSZ(n)
# ifdef SEPARATE_HEADERS
# define set_mark_bit_from_hdr(hblkhdr,n) \
(hblkhdr) -> hbh_marks[divWORDSZ(n)] \
|= 1 << modWORDSZ(n)
# endif
# define clear_mark_bit(hblk,n) hb_marks(hblk)[divWORDSZ(n)] \
&= ~(1 << modWORDSZ(n))
/* Data structure for list of uncollectable objects. */
struct uncollectable_structure {
struct uncollectable_structure *next;
char *addr1;
char *addr2;
};
/* List of data segments to be used as roots. */
struct data_list {
char *start, *end;
};
/* Procedures private to the collector. */
/* Small object allocation routines. Ensure nonempty free list. */
/* Take size (in words) as argument. */
bool GC_allocobj(/* unsigned size_in_words */);
bool GC_allocaobj(/* unsigned size_in_words */);
/* Heap block allocation procedures */
extern void GC_freehblk(/* struct hblk * h */);
extern struct hblk * GC_allochblk(/* long signed_obj_size_in_words */);
void GC_add_hblklist(/* struct hblk * hbp*/);
void GC_del_hblklist(/* struct hblk * hbp*/);
/* Heap expansion: */
struct hblk * GC_get_sys_mem(/* unsigned long size_in_bytes */);
/* Get a page aligned block of memory. Assumes */
/* we have the allocation monitor lock. */
/* returns (struct hblk *)0 on failure. */
struct hblk * GC_get_sys_mem_initial(/* unsigned long size_in_bytes */);
/* Similar to the above, but intended to be */
/* called before the threads world starts up. */
bool GC_expand_hp(/* unsigned size_in_hblks, bool initial */);
/* Get more system memory and add it to heap. */
/* Assumes we hold allocation lock. */
/* Returns TRUE on success. */
/* Initial ==> threads world hasn't started */
/* yet. */
/* Utilities */
void GC_printf(/* char * fmt, ... */);
void GC_vprintf(/* char * fmt, ... */); /* Print only at high verbosity */
void GC_iprintf(/* char * fmt, ... */); /* Print important info */
void GC_abort(/* char * msg_without_nl */);
void GC_RunExclusive(/* void (*proc), XR_Pointer arg */);
void GC_RunParallel(/* XR_MesaProc mp */);
void GC_lock_to_fixed_vp();
/* myself to vp 0, so that the UNIX stack can be used. */
unsigned long GC_check_faults(); /* Number of page faults so far. */
/* Heap block partitioning (alloc.c): */
bool GC_new_hblk(/* long signed_obj_size_in_words */);
/* allocate block and build free list. */
/* Heap block header allocation: */
# ifdef SEPARATE_HEADERS
void GC_get_header(/* struct hblk *h */);
/* Make sure that a header is available for heap block h */
void GC_use_as_headers(/* word *h, long n*/);
/* Release the block at h of size n * HBLKSIZE for use as heap */
/* block headers Must be called sufficiently frequently that we */
/* never run out. */
# ifdef DIRTY_BITS
void GC_unprotect_headers();
/* Unprotect all pages in memory that include only headers. */
/* This interacts with ThreadsVirtualDirty.c. It may safely */
/* be called at any time, PROVIDED we do not care about dirty */
/* bit information on the headers. Calling it may improve */
/* performance. Note however, that it only affects the */
/* current virtual processor. */
/* This assumes that dirty bits are implemented by */
/* protecting memory. */
# endif DIRTY_BITS
# endif SEPARATE_HEADERS
/* Garbage collection, as a whole: */
/* The deamon runs in a sep thread and calls all of these routines. */
void GC_daemon(); /* Check whether a garbage collection should */
/* be started. If so, start one of the */
/* appropriate flavor. */
void GC_full_collection(/* bool stop_the_world */); /* do a full collection */
void GC_partial_collection(); /* do a partial collection */
void GC_freeze_and_start_reclaim(); /* finish the mark and start the reclaim */
void GC_mark_with_constant_stack(/* int flags */);
/* nail the process to a stack and do */
/* some marking. */
void GC_mark_with_constant_stack_inner(/* XR_Pointer clientData */);
/* internal routine for above */
void GC_parallel_mark(); /* run the partial parallel marker */
XR_Pointer GC_Partial_Mark_Inner(/* XR_MesaProc self */);
void GC_full_parallel_mark(); /* run the full marker */
XR_Pointer GC_Full_Mark_Inner(/*XR_MesaProc self*/);
void GC_exclusive_mark(); /* Garbage collect, assuming we */
/* own the allocation monitor */
/* lock. */
void GC_exclusive_mark_inner();
void GC_collector_setup();
void GC_heap_full(); /* Invoke whatever collection is appropriate */
/* for having run out of small object heap */
/* spave. */
void GC_reclaim_hblks(/* long lw */);
/* Analogous to GC_heap_full, but we need a */
/* space for a large object of size lw words. */
/* Garbage collector mark phase: */
XR_Pointer GC_Mark_and_GCollect_Proc(/* int flags */);
/* Mark and the invoke collector, adjusting priorities */
/* appropriately. Return 0. */
XR_Pointer GC_Mark_Proc(/* int flags */);
/* Concurrently remark, adjusting priorities */
/* appropriately. Return 0. */
void GC_partial_parallel_gc();
/* start up a thread to mark and to perform a partial */
/* collection. */
void GC_full_parallel_mark();
/* Start a process that reclaims all pages that were */
/* reclaimed in the last cycle, flushes the rest, clears */
/* all mark bits, and remarks from scratch. */
/* Return when the process completes. */
void GC_wait_for_gc();
/* Wait until the current parallel collection is done. */
/* Requires GC_allocate_ml. */
void GC_mark_rescuers();
void GC_mark_roots(/* int mark_clean*/);
/* Push all (or dirty) roots onto the mark stack. */
/* See GC_mark_all for mark_clean values. */
void GC_mark(/* int alignment */); /* Mark everything on the mark stack */
void GC_clear_marks(); /* Clear all mark bits */
void GC_mark_all( /* word * b, word * t, int alignment, bool mark_clean */ );
# define POSSIBLY_DIRTY_POINTERS 2
# define DEFINITELY_DIRTY_POINTERS 3
# define ALL_POINTERS 4
/* Push all pointers between b and t onto the mark stack */
/* If mark_clean is POSSIBLY_DIRTY_ROOTS, ignore clean */
/* pages when convenient. If it is DEFINITELY_DIRTY_ROOTS */
/* also ignore pages without dirty bits. */
void GC_tl_mark(/* word * ptr */); /* Add ptr to the mark stack. (Slow.) */
void GC_tl_mark_all(/* word * b, word * t */);
/* Similar ot GC_mark_all, but uses ALIGNMENT value */
/* defined here. */
bool GC_tl_mark_all_but_gc(/* word *b , word * t */);
/* Similar to the above, but excl. GC area. Returns TRUE */
/* Garbage collector sweep phase: */
bool GC_hblk_empty(/* struct hblk *hbp */);
/* Is a block empty? May err on no side. */
void GC_reclaim_entire_block(/* struct hblk *hbp */);
/* Make entire block allocatable again. */
bool GC_hblk_probably_tenurable();
bool GC_hblk_definitely_tenurable();
void GC_reclaim_composite();
void GC_reclaim_atomic();
void GC_flush_reclaim_pages();
void GC_reclaim();
/* Virtual dirty bits: */
void GC_clear_dirty_bits();
void GC_clear_some_dirty_bits(/* char * start, char * limit */);
void GC_get_dirty_bits();
void GC_set_dirty_bits();
void GC_propagate_dirty_bits();
/* Heap block map cache: */
void GC_flush_map_cache();
void GC_add_cache_entry();
/* Finalization: */
void GC_TraceFinalizableObjects();
/* Collector variables declared as globals, but intended to be private to */
/* the collector. */
/* These are declared in and initialized in globals.c, */
/* to ensure that they all end up in the data segment, adjacent to */
/* each other. */
extern char end; /* Used to find end of bss segment. Not included in */
/* globals.c. */
extern word GC_first_global;
extern unsigned GC_mode; /* Type of garbage collections to be performed. */
extern unsigned GC_requested_mode; /* Mode to switch to at next */
/* opportunity. */
# define DIRTY_BITS_REQUIRED (GC_INCREMENTAL | GC_PARALLEL)
extern struct hblk * GC_hblkfreelist; /* list of free heap blocks. */
# ifdef PRINTSTATS
long GC_reclaim_count; /* Number of reclaimed small object blocks */
# endif
/*
* Private versions of various GC statistics variables. These are cleared
* when mark bits are cleared. Thus, unlike the public versions, they do
* not contain plausible values during a parallel full collection.
*/
extern long GC_my_composite_in_use;
extern long GC_my_atomic_in_use;
extern long GC_my_objects_in_use;
extern bool GC_demand_collection; /* Set when there's an asynch request */
# ifdef SEPARATE_HEADERS
/* Free list for heap block headers */
extern hdr * GC_hdr_fl;
# endif
/* Monitor locks for allocation, and allocator callback respectively */
extern struct XR_MLRep GC_allocate_ml;
/* Protects free list and reclaim list data structures, as well as */
/* GC_collection_in_progress variable. */
/* Mark bits are valid only */
/* for pages which reside on a reclaim list. They may be set */
/* asynchronously on pages being reclaimed, but only if they are */
/* valid before and after the bit was set; this can only happen */
/* as a consequence of a misidentified pointer. */
extern struct XR_MLRep GC_alloc_callback_ml;
/* Finalization Queue: */
# ifdef FINALIZE
extern XR_FinalizationHandle GC_finalizeListHead;
# endif /* FINALIZE */
/* Garbage collector callbacks: */
extern RegisterGCCallbackType GC_callBackBefore, GC_callBackAfter,
GC_callBackDuring;
extern XR_Pointer GC_callBackBeforeClientData, GC_callBackAfterClientData,
GC_callBackDuringClientData;
/* Mark stack. We assume that we can use the UNIX stack for this purpose. */
/* All items between GC_mark_stack_top+1 and GC_mark_stack_bottom still */
/* need to be marked. All values on the stack are assumed to have passed */
/* quicktest, but are not guaranteed to be valid object pointers. */
/* Since we assume that we have the UNIX stack all to ourselves, */
/* GC_mark_stack_bottom is set exactly once on startup. */
word * GC_mark_stack_bottom;
extern word * GC_mark_stack_top;
# define STACKGAP 2048 /* Gap in longwords between hardware stack and */
/* the mark stack. */
/* Must suffice for printf calls and signal */
/* handling. */
# define PUSH_MS(ptr) *(--GC_mark_stack_top) = (word) ptr
# define USE_STACK /* Some code also allows a heap resident mark stack. */
/* List of root segments for marking: */
extern long GC_num_data; /* Number of entries in GC_data_list */
# define MAX_DATA_LIST 1000
extern struct data_list GC_data_list[MAX_DATA_LIST];
/* List of all data areas that must be treated as roots */
/* by the collector. */
/* lists of all heap blocks and free lists */
struct obj * GC_aobjfreelist[MAXOBJSZ+1]; /* atomic object free list headers */
struct obj * GC_objfreelist[MAXOBJSZ+1]; /* composite object ... */
/* Note that the headers conatin unencrypted pointers. Links are */
/* "encrypted" with ENCRYPT_POINTER. */
char GC_hblkmap[MAP_SIZE]; /* Indexed by offset from GC_heapstart */
/* Identifies which blocks are administered */
/* by collector. */
# define HBLK_INVALID 0 /* Not administered by collector */
# define HBLK_VALID 0x7f /* Beginning of a valid heap block */
/* A value n, 0 < n < 0x7f denotes the continuation of a valid heap */
/* block which starts at the current address - n * HBLKSIZE or earlier */
# ifdef BLACK_LIST
char GC_blacklist[MAP_SIZE]; /* Nonzero ==> false reference */
# define black_list(hbp) \
GC_blacklist[divHBLKSZ(((long)(hbp)) - ((long) GC_heapstart))]
/* This keeps some of Sun's compilers from calling .div */
# define is_black_listed(hbp) (black_list(hbp) != 0)
# endif
struct hblk * GC_reclaim_list[MAXOBJSZ+1]; /* Heap blocks of a given size */
/* waiting for reclamation */
struct hblk * GC_areclaim_list[MAXOBJSZ+1]; /* Again, for atomic objects */
unsigned long GC_reclaim_inval_counter; /* Incremented whenever reclaim */
/* lists or mark bits are */
/* invalidated. Reclaim */
/* procedures discard results */
/* if this changed. */
/* Protected by GC_allocate_ml. */
char * GC_obj_map[MAXOBJSZ+1];
/* If not NIL, then a pointer to a map of valid */
/* object addresses. hbh_map[sz][i] is j if the */
/* address block_start+4*i is a valid pointer */
/* to an object at block_start+4*(i-j). It is */
/* OBJ_INVALID if block_start+4*i is not valid */
/* as a pointer to an object. */
/* We assume that all values of j <= OBJ_INVALID */
/* Note that this is effectively a mapping on */
/* longword addresses; low order bits must */
/* be masked off before it can be applied. */
# define OBJ_INVALID 0x7f
# ifdef RESTRICTED_INTERIOR_POINTERS
char GC_valid_offsets[MAX_OFFSET+1];
/* valid_offsets[i] != 0 iff i is a valid byte offset */
/* of a pointer from the beginning of the object. */
# endif
# ifdef SEPARATE_HEADERS
struct hblkhdr * GC_headers[MAP_SIZE];
/* Headers for heap blocks administered by collector */
/* Invalid for blocks not administered by collector. */
/* Indexed in the same manner as hblkmap. */
# endif
char GC_map_cache[MAP_CACHE_SZ][HBLKSIZE/BYTES_PER_WORD];
/* Space used to hold GC_obj_map maps */
# ifdef MERGE_SIZES
long GC_size_map[MAXOBJSZ+1]; /* Size i allocation requests are rounded */
/* up to size GC_size_map[i]. */
# endif
extern char GC_dirty_bits[/* MAP_SIZE + slop for data segment */];
/* != 0 ==> block may have been changed */
/* since the last time the entry was */
/* cleared using one of the routines in */
/* dirty.c. */
extern char * VD_base; /* Lowest address represented in the */
/* above array. Always HBLKSIZE aligned */
word GC_last_global;
# define objfreelist GC_objfreelist
# define aobjfreelist GC_aobjfreelist
# define reclaim_list GC_reclaim_list
# define areclaim_list GC_areclaim_list
# define hblkmap GC_hblkmap
# define obj_map GC_obj_map
# define map_cache GC_map_cache
# ifdef RESTRICTED_INTERIOR_POINTERS
# define valid_offsets GC_valid_offsets
# endif
# ifdef SEPARATE_HEADERS
# define headers GC_headers
# endif
# define begin_gc_arrays ((char *)(&GC_first_global))
# define end_gc_arrays ((char *)(&GC_last_global))
# define HB_SIZE(p) abs(hb_sz(p))
# define abs(x) ((x) < 0? (-(x)) : (x))
struct uncollectable_structure * GC_pin_head;
/* Head of list of uncollectable objects. */
# endif